Github repository: https://github.com/GreatLearningAIML1/gl-pgp-aiml-uta-intl-aug20-adris-misra
Project - Bank Churn Prediction¶Objective:¶Given a Bank customer, build a neural network-based classifier that can determine whether they will leave or not in the next 6 months.
Domain:¶Learning Outcomes:¶Attribute Information:¶# Import warnings to suppress runtime warnings
import warnings
warnings.filterwarnings('ignore')
# Import basic libraries for data and visualization
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(color_codes=True)
%matplotlib inline
# Import models and relevent class / functions
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, recall_score, precision_score, f1_score, roc_auc_score,accuracy_score
from sklearn.metrics import precision_recall_curve, auc, roc_curve
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, BatchNormalization
from kerastuner.tuners import RandomSearch
from kerastuner.tuners import Hyperband
import hiplot as hip
from keras.utils.vis_utils import plot_model
from tensorflow.keras import optimizers
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
# Import other utilities
import os
import json
from pathlib import Path
import shutil
import shap
# Import data
cust = pd.read_csv("bank.csv")
# checking sample head data
cust.head()
| RowNumber | CustomerId | Surname | CreditScore | Geography | Gender | Age | Tenure | Balance | NumOfProducts | HasCrCard | IsActiveMember | EstimatedSalary | Exited | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | 1 | 15634602 | Hargrave | 619 | France | Female | 42 | 2 | 0.00 | 1 | 1 | 1 | 101348.88 | 1 |
| 1 | 2 | 15647311 | Hill | 608 | Spain | Female | 41 | 1 | 83807.86 | 1 | 0 | 1 | 112542.58 | 0 |
| 2 | 3 | 15619304 | Onio | 502 | France | Female | 42 | 8 | 159660.80 | 3 | 1 | 0 | 113931.57 | 1 |
| 3 | 4 | 15701354 | Boni | 699 | France | Female | 39 | 1 | 0.00 | 2 | 0 | 0 | 93826.63 | 0 |
| 4 | 5 | 15737888 | Mitchell | 850 | Spain | Female | 43 | 2 | 125510.82 | 1 | 1 | 1 | 79084.10 | 0 |
# checking sample tail data
cust.tail()
| RowNumber | CustomerId | Surname | CreditScore | Geography | Gender | Age | Tenure | Balance | NumOfProducts | HasCrCard | IsActiveMember | EstimatedSalary | Exited | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 9995 | 9996 | 15606229 | Obijiaku | 771 | France | Male | 39 | 5 | 0.00 | 2 | 1 | 0 | 96270.64 | 0 |
| 9996 | 9997 | 15569892 | Johnstone | 516 | France | Male | 35 | 10 | 57369.61 | 1 | 1 | 1 | 101699.77 | 0 |
| 9997 | 9998 | 15584532 | Liu | 709 | France | Female | 36 | 7 | 0.00 | 1 | 0 | 1 | 42085.58 | 1 |
| 9998 | 9999 | 15682355 | Sabbatini | 772 | Germany | Male | 42 | 3 | 75075.31 | 2 | 1 | 0 | 92888.52 | 1 |
| 9999 | 10000 | 15628319 | Walker | 792 | France | Female | 28 | 4 | 130142.79 | 1 | 1 | 0 | 38190.78 | 0 |
# shape of dataframe
cust.shape
(10000, 14)
cust.info()
<class 'pandas.core.frame.DataFrame'> RangeIndex: 10000 entries, 0 to 9999 Data columns (total 14 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 RowNumber 10000 non-null int64 1 CustomerId 10000 non-null int64 2 Surname 10000 non-null object 3 CreditScore 10000 non-null int64 4 Geography 10000 non-null object 5 Gender 10000 non-null object 6 Age 10000 non-null int64 7 Tenure 10000 non-null int64 8 Balance 10000 non-null float64 9 NumOfProducts 10000 non-null int64 10 HasCrCard 10000 non-null int64 11 IsActiveMember 10000 non-null int64 12 EstimatedSalary 10000 non-null float64 13 Exited 10000 non-null int64 dtypes: float64(2), int64(9), object(3) memory usage: 1.1+ MB
# Check for Missing Values
cust.isnull().sum()
RowNumber 0 CustomerId 0 Surname 0 CreditScore 0 Geography 0 Gender 0 Age 0 Tenure 0 Balance 0 NumOfProducts 0 HasCrCard 0 IsActiveMember 0 EstimatedSalary 0 Exited 0 dtype: int64
cust.describe().T
| count | mean | std | min | 25% | 50% | 75% | max | |
|---|---|---|---|---|---|---|---|---|
| RowNumber | 10000.0 | 5.000500e+03 | 2886.895680 | 1.00 | 2500.75 | 5.000500e+03 | 7.500250e+03 | 10000.00 |
| CustomerId | 10000.0 | 1.569094e+07 | 71936.186123 | 15565701.00 | 15628528.25 | 1.569074e+07 | 1.575323e+07 | 15815690.00 |
| CreditScore | 10000.0 | 6.505288e+02 | 96.653299 | 350.00 | 584.00 | 6.520000e+02 | 7.180000e+02 | 850.00 |
| Age | 10000.0 | 3.892180e+01 | 10.487806 | 18.00 | 32.00 | 3.700000e+01 | 4.400000e+01 | 92.00 |
| Tenure | 10000.0 | 5.012800e+00 | 2.892174 | 0.00 | 3.00 | 5.000000e+00 | 7.000000e+00 | 10.00 |
| Balance | 10000.0 | 7.648589e+04 | 62397.405202 | 0.00 | 0.00 | 9.719854e+04 | 1.276442e+05 | 250898.09 |
| NumOfProducts | 10000.0 | 1.530200e+00 | 0.581654 | 1.00 | 1.00 | 1.000000e+00 | 2.000000e+00 | 4.00 |
| HasCrCard | 10000.0 | 7.055000e-01 | 0.455840 | 0.00 | 0.00 | 1.000000e+00 | 1.000000e+00 | 1.00 |
| IsActiveMember | 10000.0 | 5.151000e-01 | 0.499797 | 0.00 | 0.00 | 1.000000e+00 | 1.000000e+00 | 1.00 |
| EstimatedSalary | 10000.0 | 1.000902e+05 | 57510.492818 | 11.58 | 51002.11 | 1.001939e+05 | 1.493882e+05 | 199992.48 |
| Exited | 10000.0 | 2.037000e-01 | 0.402769 | 0.00 | 0.00 | 0.000000e+00 | 0.000000e+00 | 1.00 |
# Count of 0 in each column
(cust == 0).sum(axis=0)
RowNumber 0 CustomerId 0 Surname 0 CreditScore 0 Geography 0 Gender 0 Age 0 Tenure 413 Balance 3617 NumOfProducts 0 HasCrCard 2945 IsActiveMember 4849 EstimatedSalary 0 Exited 7963 dtype: int64
# Finding all duplicate rows and adding a dup column to find count
dup_cust = cust[cust.duplicated(keep=False)]
dup_cust['dup'] = 1
dup_cust
| RowNumber | CustomerId | Surname | CreditScore | Geography | Gender | Age | Tenure | Balance | NumOfProducts | HasCrCard | IsActiveMember | EstimatedSalary | Exited | dup |
|---|
Preliminary data analysis:
# Number of unique in each column
cust.nunique()
RowNumber 10000 CustomerId 10000 Surname 2932 CreditScore 460 Geography 3 Gender 2 Age 70 Tenure 11 Balance 6382 NumOfProducts 4 HasCrCard 2 IsActiveMember 2 EstimatedSalary 9999 Exited 2 dtype: int64
# Make a copy of original dataframe and drop unwanted columns
customer_copy = cust.copy()
cust.drop(['RowNumber', 'CustomerId','Surname'], axis=1, inplace=True)
cust
| CreditScore | Geography | Gender | Age | Tenure | Balance | NumOfProducts | HasCrCard | IsActiveMember | EstimatedSalary | Exited | |
|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | 619 | France | Female | 42 | 2 | 0.00 | 1 | 1 | 1 | 101348.88 | 1 |
| 1 | 608 | Spain | Female | 41 | 1 | 83807.86 | 1 | 0 | 1 | 112542.58 | 0 |
| 2 | 502 | France | Female | 42 | 8 | 159660.80 | 3 | 1 | 0 | 113931.57 | 1 |
| 3 | 699 | France | Female | 39 | 1 | 0.00 | 2 | 0 | 0 | 93826.63 | 0 |
| 4 | 850 | Spain | Female | 43 | 2 | 125510.82 | 1 | 1 | 1 | 79084.10 | 0 |
| ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... |
| 9995 | 771 | France | Male | 39 | 5 | 0.00 | 2 | 1 | 0 | 96270.64 | 0 |
| 9996 | 516 | France | Male | 35 | 10 | 57369.61 | 1 | 1 | 1 | 101699.77 | 0 |
| 9997 | 709 | France | Female | 36 | 7 | 0.00 | 1 | 0 | 1 | 42085.58 | 1 |
| 9998 | 772 | Germany | Male | 42 | 3 | 75075.31 | 2 | 1 | 0 | 92888.52 | 1 |
| 9999 | 792 | France | Female | 28 | 4 | 130142.79 | 1 | 1 | 0 | 38190.78 | 0 |
10000 rows × 11 columns
# Separate out numerical columns vs categorical columns
num_col = ['CreditScore', 'Age', 'Tenure', 'Balance', 'NumOfProducts', 'EstimatedSalary']
cat_col = list(set(cust.columns) - set(num_col))
print("Numerical columns: ",num_col)
print("Categorical columns:", cat_col)
Numerical columns: ['CreditScore', 'Age', 'Tenure', 'Balance', 'NumOfProducts', 'EstimatedSalary'] Categorical columns: ['HasCrCard', 'Exited', 'Geography', 'IsActiveMember', 'Gender']
# Value counts of all categorical columns
for col in cat_col:
print('*********',col,'***********')
df=pd.concat([cust[col].value_counts(),cust[col].value_counts(normalize=True)*100],axis=1)
df=df.reset_index()
df.columns=[col,'count','frequency%']
print(df)
print()
********* HasCrCard *********** HasCrCard count frequency% 0 1 7055 70.55 1 0 2945 29.45 ********* Exited *********** Exited count frequency% 0 0 7963 79.63 1 1 2037 20.37 ********* Geography *********** Geography count frequency% 0 France 5014 50.14 1 Germany 2509 25.09 2 Spain 2477 24.77 ********* IsActiveMember *********** IsActiveMember count frequency% 0 1 5151 51.51 1 0 4849 48.49 ********* Gender *********** Gender count frequency% 0 Male 5457 54.57 1 Female 4543 45.43
# categorical columns count plot
for col in cat_col:
unq= cust[col].nunique()
plt.figure(figsize=(unq*1.5,5));
sns.countplot(x=cust[col], hue=cust['Exited']);
plt.xlabel(col, size=20);
plt.ylabel('Count', size=20);
plt.xticks(rotation=90, size=15);
plt.yticks(size=15);
plt.show()
# Check for Skew for all variables
cust.skew().sort_values(ascending=False)
Exited 1.471611 Age 1.011320 NumOfProducts 0.745568 Tenure 0.010991 EstimatedSalary 0.002085 IsActiveMember -0.060437 CreditScore -0.071607 Balance -0.141109 HasCrCard -0.901812 dtype: float64
# Check for Kurt for all variables
cust.kurt().sort_values(ascending=False)
Age 1.395347 NumOfProducts 0.582981 Exited 0.165671 CreditScore -0.425726 Tenure -1.165225 EstimatedSalary -1.181518 HasCrCard -1.186973 Balance -1.489412 IsActiveMember -1.996747 dtype: float64
# continues columns plot
plt.rc('xtick', labelsize=15)
plt.rc('ytick', labelsize=15)
for col in num_col:
fig, [ax1,ax2] = plt.subplots(nrows=1,ncols=2,figsize = (16,5));
sns.distplot(x=cust[cust['Exited']==0][col], kde=True, ax=ax1);
ax1.set_title('Exited = 0', fontsize=18);
ax1.set_xlabel(col, size=20);
ax1.set_ylabel('Density', size=20);
sns.distplot(x=cust[cust['Exited']==1][col], kde=True, ax=ax2);
ax2.set_title('Exited = 1', fontsize=18)
ax2.set_xlabel(col, size=20);
ax2.set_ylabel('Density', size=20);
fig.tight_layout();
plt.show();
plt.rcParams.update(plt.rcParamsDefault)
# Boxplot to further see outliers
for col in num_col:
sns.boxplot(y=cust[col], x=cust['Exited'])
plt.xlabel('Exited', size=15);
plt.ylabel(col, size=15);
plt.show()
Observation:
# pairplot With Target in Hue
sns.set_context(rc={"axes.labelsize":30})
sns.pairplot(cust, hue='Exited',diag_kind ='kde', height = 4);
plt.rcParams.update(plt.rcParamsDefault)
# pairplot including target
sns.set_context(rc={"axes.labelsize":30})
sns.pairplot(cust, diag_kind ='kde', height = 4);
plt.rcParams.update(plt.rcParamsDefault)
# correlation matrix
plt.figure(figsize=(20,18))
sns.heatmap(cust.corr(), annot=True, annot_kws={"size":15});
plt.xticks(rotation=90, size=12, color='blue');
plt.yticks(rotation=0, size=12, color='blue');
Observation:
# Convert the columns with an 'object' datatype into categorical variables
for col in cust.columns: # Loop through all columns in the dataframe
if cust[col].dtype == 'object': # Only apply for columns with categorical strings
cust[col] = pd.Categorical(cust[col])# Replace strings with an integer
# checking sample head data
cust.head(10)
| CreditScore | Geography | Gender | Age | Tenure | Balance | NumOfProducts | HasCrCard | IsActiveMember | EstimatedSalary | Exited | |
|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | 619 | France | Female | 42 | 2 | 0.00 | 1 | 1 | 1 | 101348.88 | 1 |
| 1 | 608 | Spain | Female | 41 | 1 | 83807.86 | 1 | 0 | 1 | 112542.58 | 0 |
| 2 | 502 | France | Female | 42 | 8 | 159660.80 | 3 | 1 | 0 | 113931.57 | 1 |
| 3 | 699 | France | Female | 39 | 1 | 0.00 | 2 | 0 | 0 | 93826.63 | 0 |
| 4 | 850 | Spain | Female | 43 | 2 | 125510.82 | 1 | 1 | 1 | 79084.10 | 0 |
| 5 | 645 | Spain | Male | 44 | 8 | 113755.78 | 2 | 1 | 0 | 149756.71 | 1 |
| 6 | 822 | France | Male | 50 | 7 | 0.00 | 2 | 1 | 1 | 10062.80 | 0 |
| 7 | 376 | Germany | Female | 29 | 4 | 115046.74 | 4 | 1 | 0 | 119346.88 | 1 |
| 8 | 501 | France | Male | 44 | 4 | 142051.07 | 2 | 0 | 1 | 74940.50 | 0 |
| 9 | 684 | France | Male | 27 | 2 | 134603.88 | 1 | 1 | 1 | 71725.73 | 0 |
cust.info()
<class 'pandas.core.frame.DataFrame'> RangeIndex: 10000 entries, 0 to 9999 Data columns (total 11 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 CreditScore 10000 non-null int64 1 Geography 10000 non-null category 2 Gender 10000 non-null category 3 Age 10000 non-null int64 4 Tenure 10000 non-null int64 5 Balance 10000 non-null float64 6 NumOfProducts 10000 non-null int64 7 HasCrCard 10000 non-null int64 8 IsActiveMember 10000 non-null int64 9 EstimatedSalary 10000 non-null float64 10 Exited 10000 non-null int64 dtypes: category(2), float64(2), int64(7) memory usage: 723.0 KB
# Defining independent and dependent variable
X = cust.drop(['Exited'],axis=1)
y = cust['Exited']
# convert categorical variables to dummy variables
X = pd.get_dummies(X)
X.columns
Index(['CreditScore', 'Age', 'Tenure', 'Balance', 'NumOfProducts', 'HasCrCard',
'IsActiveMember', 'EstimatedSalary', 'Geography_France',
'Geography_Germany', 'Geography_Spain', 'Gender_Female', 'Gender_Male'],
dtype='object')
X
| CreditScore | Age | Tenure | Balance | NumOfProducts | HasCrCard | IsActiveMember | EstimatedSalary | Geography_France | Geography_Germany | Geography_Spain | Gender_Female | Gender_Male | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | 619 | 42 | 2 | 0.00 | 1 | 1 | 1 | 101348.88 | 1 | 0 | 0 | 1 | 0 |
| 1 | 608 | 41 | 1 | 83807.86 | 1 | 0 | 1 | 112542.58 | 0 | 0 | 1 | 1 | 0 |
| 2 | 502 | 42 | 8 | 159660.80 | 3 | 1 | 0 | 113931.57 | 1 | 0 | 0 | 1 | 0 |
| 3 | 699 | 39 | 1 | 0.00 | 2 | 0 | 0 | 93826.63 | 1 | 0 | 0 | 1 | 0 |
| 4 | 850 | 43 | 2 | 125510.82 | 1 | 1 | 1 | 79084.10 | 0 | 0 | 1 | 1 | 0 |
| ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... |
| 9995 | 771 | 39 | 5 | 0.00 | 2 | 1 | 0 | 96270.64 | 1 | 0 | 0 | 0 | 1 |
| 9996 | 516 | 35 | 10 | 57369.61 | 1 | 1 | 1 | 101699.77 | 1 | 0 | 0 | 0 | 1 |
| 9997 | 709 | 36 | 7 | 0.00 | 1 | 0 | 1 | 42085.58 | 1 | 0 | 0 | 1 | 0 |
| 9998 | 772 | 42 | 3 | 75075.31 | 2 | 1 | 0 | 92888.52 | 0 | 1 | 0 | 0 | 1 |
| 9999 | 792 | 28 | 4 | 130142.79 | 1 | 1 | 0 | 38190.78 | 1 | 0 | 0 | 1 | 0 |
10000 rows × 13 columns
# Split the data into training, validation and test set in the ratio of 60:15:25 respectively
X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.25, random_state=5)
X_train, X_val, y_train, y_val = train_test_split( X_train, y_train, test_size=0.2, random_state=5)
# checking if distribution between train and test are similar to the original sample
print('y:\n', y.value_counts(normalize=True)*100)
print('\n\ny_train:\n', y_train.value_counts(normalize=True)*100)
print('\n\ny_val:\n', y_val.value_counts(normalize=True)*100)
print('\n\ny_test:\n', y_test.value_counts(normalize=True)*100)
y: 0 79.63 1 20.37 Name: Exited, dtype: float64 y_train: 0 79.4 1 20.6 Name: Exited, dtype: float64 y_val: 0 80.0 1 20.0 Name: Exited, dtype: float64 y_test: 0 79.96 1 20.04 Name: Exited, dtype: float64
# Scaling data
scale = StandardScaler()
X_train = scale.fit_transform(X_train)
X_val = scale.fit_transform(X_val)
X_test = scale.fit_transform(X_test)
X_train.shape
(6000, 13)
X_train
array([[-0.49194358, 0.00878354, -0.01039474, ..., 1.76981112,
1.11076771, -1.11076771],
[ 0.63858766, 0.39206541, -1.39635974, ..., -0.56503205,
-0.90027824, 0.90027824],
[ 1.28164213, 2.02101336, -0.01039474, ..., -0.56503205,
-0.90027824, 0.90027824],
...,
[-0.32599404, -0.7577802 , -1.04986849, ..., -0.56503205,
1.11076771, -1.11076771],
[ 1.62391306, -1.0452416 , 0.68258776, ..., 1.76981112,
1.11076771, -1.11076771],
[ 1.08457705, -0.08703692, -1.39635974, ..., -0.56503205,
1.11076771, -1.11076771]])
X_val.shape
(1500, 13)
X_val
array([[-1.44704782, 1.85917911, -0.66564414, ..., -0.60405063,
1.07767494, -1.07767494],
[-0.49343642, 0.9272909 , 0.7401513 , ..., -0.60405063,
-0.92792359, 0.92792359],
[ 0.72393985, -0.75010788, -0.31419528, ..., 1.65549037,
-0.92792359, 0.92792359],
...,
[ 0.87611188, -0.56373024, -0.31419528, ..., -0.60405063,
1.07767494, -1.07767494],
[ 1.92102651, -0.00459732, 0.03725358, ..., 1.65549037,
1.07767494, -1.07767494],
[ 0.9369807 , -0.3773526 , 0.38870244, ..., 1.65549037,
-0.92792359, 0.92792359]])
X_test.shape
(2500, 13)
X_test
array([[ 1.21956451, -0.08389144, 0.32646004, ..., 1.7339003 ,
-0.93266172, 0.93266172],
[-1.22785449, -0.466259 , 0.66751733, ..., -0.57673443,
1.07220011, -1.07220011],
[ 0.80298255, -0.466259 , 1.34963192, ..., -0.57673443,
1.07220011, -1.07220011],
...,
[-1.29034178, -0.65744278, -1.71988372, ..., 1.7339003 ,
1.07220011, -1.07220011],
[ 0.31349875, -0.37066711, -1.03776913, ..., -0.57673443,
1.07220011, -1.07220011],
[ 1.00085898, 0.10729234, -1.71988372, ..., -0.57673443,
-0.93266172, 0.93266172]])
# Converting Y variable to aarrays
y_train= y_train.values
y_val= y_val.values
y_test = y_test.values
y_train
array([0, 0, 1, ..., 0, 1, 0], dtype=int64)
y_train.shape
(6000,)
y_val
array([0, 0, 0, ..., 0, 0, 0], dtype=int64)
y_val.shape
(1500,)
y_test
array([0, 0, 0, ..., 0, 1, 0], dtype=int64)
y_test.shape
(2500,)
# Initializing blank sequential model
model = Sequential()
# Adding 1st hidden layer and output layer
# Choosing 24 neuron in 1st layer with activation function relu
# as this is binary classification there is one output layer
model.add(Dense(24, input_shape = (13,), activation = 'relu', kernel_initializer='normal', bias_initializer='zeros'))
model.add(Dense(1, activation = 'sigmoid'))
# Optimizer and Loss function
opt = optimizers.Adam(lr = 0.001)
lss = 'binary_crossentropy'
# Model compile
model.compile(optimizer = opt, loss = lss, metrics=['accuracy'])
# Model summary
model.summary()
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= dense (Dense) (None, 24) 336 _________________________________________________________________ dense_1 (Dense) (None, 1) 25 ================================================================= Total params: 361 Trainable params: 361 Non-trainable params: 0 _________________________________________________________________
# running model on 50 Epocs and each bach size of 1000 records
history = model.fit(X_train, y_train, batch_size=1000, epochs = 200, verbose = 1,validation_data=(X_val, y_val))
Epoch 1/200 6/6 [==============================] - 3s 343ms/step - loss: 0.6846 - accuracy: 0.5730 - val_loss: 0.6678 - val_accuracy: 0.6273 Epoch 2/200 6/6 [==============================] - 0s 11ms/step - loss: 0.6618 - accuracy: 0.6539 - val_loss: 0.6459 - val_accuracy: 0.6993 Epoch 3/200 6/6 [==============================] - 0s 12ms/step - loss: 0.6401 - accuracy: 0.7096 - val_loss: 0.6254 - val_accuracy: 0.7360 Epoch 4/200 6/6 [==============================] - 0s 10ms/step - loss: 0.6197 - accuracy: 0.7526 - val_loss: 0.6064 - val_accuracy: 0.7653 Epoch 5/200 6/6 [==============================] - 0s 10ms/step - loss: 0.6005 - accuracy: 0.7775 - val_loss: 0.5888 - val_accuracy: 0.7820 Epoch 6/200 6/6 [==============================] - 0s 10ms/step - loss: 0.5814 - accuracy: 0.7981 - val_loss: 0.5726 - val_accuracy: 0.7900 Epoch 7/200 6/6 [==============================] - 0s 10ms/step - loss: 0.5691 - accuracy: 0.8002 - val_loss: 0.5577 - val_accuracy: 0.7947 Epoch 8/200 6/6 [==============================] - 0s 11ms/step - loss: 0.5554 - accuracy: 0.8030 - val_loss: 0.5441 - val_accuracy: 0.7953 Epoch 9/200 6/6 [==============================] - 0s 11ms/step - loss: 0.5364 - accuracy: 0.8132 - val_loss: 0.5317 - val_accuracy: 0.8020 Epoch 10/200 6/6 [==============================] - 0s 10ms/step - loss: 0.5282 - accuracy: 0.8088 - val_loss: 0.5204 - val_accuracy: 0.8007 Epoch 11/200 6/6 [==============================] - 0s 10ms/step - loss: 0.5131 - accuracy: 0.8124 - val_loss: 0.5101 - val_accuracy: 0.8027 Epoch 12/200 6/6 [==============================] - 0s 10ms/step - loss: 0.5078 - accuracy: 0.8049 - val_loss: 0.5008 - val_accuracy: 0.8040 Epoch 13/200 6/6 [==============================] - 0s 9ms/step - loss: 0.4966 - accuracy: 0.8088 - val_loss: 0.4923 - val_accuracy: 0.8053 Epoch 14/200 6/6 [==============================] - 0s 10ms/step - loss: 0.4922 - accuracy: 0.8043 - val_loss: 0.4847 - val_accuracy: 0.8053 Epoch 15/200 6/6 [==============================] - 0s 9ms/step - loss: 0.4770 - accuracy: 0.8095 - val_loss: 0.4778 - val_accuracy: 0.8040 Epoch 16/200 6/6 [==============================] - 0s 10ms/step - loss: 0.4743 - accuracy: 0.8075 - val_loss: 0.4717 - val_accuracy: 0.8060 Epoch 17/200 6/6 [==============================] - 0s 10ms/step - loss: 0.4692 - accuracy: 0.8090 - val_loss: 0.4662 - val_accuracy: 0.8073 Epoch 18/200 6/6 [==============================] - 0s 10ms/step - loss: 0.4651 - accuracy: 0.8100 - val_loss: 0.4611 - val_accuracy: 0.8107 Epoch 19/200 6/6 [==============================] - 0s 10ms/step - loss: 0.4589 - accuracy: 0.8111 - val_loss: 0.4565 - val_accuracy: 0.8113 Epoch 20/200 6/6 [==============================] - 0s 10ms/step - loss: 0.4573 - accuracy: 0.8053 - val_loss: 0.4524 - val_accuracy: 0.8113 Epoch 21/200 6/6 [==============================] - 0s 10ms/step - loss: 0.4572 - accuracy: 0.8056 - val_loss: 0.4486 - val_accuracy: 0.8113 Epoch 22/200 6/6 [==============================] - 0s 10ms/step - loss: 0.4515 - accuracy: 0.8066 - val_loss: 0.4452 - val_accuracy: 0.8107 Epoch 23/200 6/6 [==============================] - 0s 11ms/step - loss: 0.4462 - accuracy: 0.8120 - val_loss: 0.4420 - val_accuracy: 0.8093 Epoch 24/200 6/6 [==============================] - 0s 11ms/step - loss: 0.4377 - accuracy: 0.8150 - val_loss: 0.4390 - val_accuracy: 0.8093 Epoch 25/200 6/6 [==============================] - 0s 10ms/step - loss: 0.4398 - accuracy: 0.8099 - val_loss: 0.4363 - val_accuracy: 0.8113 Epoch 26/200 6/6 [==============================] - 0s 10ms/step - loss: 0.4359 - accuracy: 0.8138 - val_loss: 0.4338 - val_accuracy: 0.8127 Epoch 27/200 6/6 [==============================] - 0s 9ms/step - loss: 0.4339 - accuracy: 0.8101 - val_loss: 0.4315 - val_accuracy: 0.8113 Epoch 28/200 6/6 [==============================] - 0s 9ms/step - loss: 0.4224 - accuracy: 0.8202 - val_loss: 0.4294 - val_accuracy: 0.8107 Epoch 29/200 6/6 [==============================] - 0s 10ms/step - loss: 0.4259 - accuracy: 0.8161 - val_loss: 0.4274 - val_accuracy: 0.8120 Epoch 30/200 6/6 [==============================] - 0s 10ms/step - loss: 0.4308 - accuracy: 0.8085 - val_loss: 0.4255 - val_accuracy: 0.8133 Epoch 31/200 6/6 [==============================] - 0s 10ms/step - loss: 0.4169 - accuracy: 0.8221 - val_loss: 0.4238 - val_accuracy: 0.8160 Epoch 32/200 6/6 [==============================] - 0s 9ms/step - loss: 0.4209 - accuracy: 0.8191 - val_loss: 0.4220 - val_accuracy: 0.8167 Epoch 33/200 6/6 [==============================] - 0s 9ms/step - loss: 0.4307 - accuracy: 0.8115 - val_loss: 0.4204 - val_accuracy: 0.8160 Epoch 34/200 6/6 [==============================] - 0s 9ms/step - loss: 0.4269 - accuracy: 0.8118 - val_loss: 0.4188 - val_accuracy: 0.8187 Epoch 35/200 6/6 [==============================] - 0s 10ms/step - loss: 0.4196 - accuracy: 0.8152 - val_loss: 0.4172 - val_accuracy: 0.8200 Epoch 36/200 6/6 [==============================] - 0s 14ms/step - loss: 0.4200 - accuracy: 0.8188 - val_loss: 0.4157 - val_accuracy: 0.8193 Epoch 37/200 6/6 [==============================] - 0s 15ms/step - loss: 0.4238 - accuracy: 0.8165 - val_loss: 0.4143 - val_accuracy: 0.8193 Epoch 38/200 6/6 [==============================] - 0s 12ms/step - loss: 0.4193 - accuracy: 0.8162 - val_loss: 0.4129 - val_accuracy: 0.8207 Epoch 39/200 6/6 [==============================] - 0s 12ms/step - loss: 0.4145 - accuracy: 0.8162 - val_loss: 0.4113 - val_accuracy: 0.8207 Epoch 40/200 6/6 [==============================] - 0s 10ms/step - loss: 0.4079 - accuracy: 0.8211 - val_loss: 0.4098 - val_accuracy: 0.8207 Epoch 41/200 6/6 [==============================] - 0s 10ms/step - loss: 0.4207 - accuracy: 0.8168 - val_loss: 0.4085 - val_accuracy: 0.8213 Epoch 42/200 6/6 [==============================] - 0s 10ms/step - loss: 0.4166 - accuracy: 0.8154 - val_loss: 0.4071 - val_accuracy: 0.8220 Epoch 43/200 6/6 [==============================] - 0s 10ms/step - loss: 0.4056 - accuracy: 0.8218 - val_loss: 0.4057 - val_accuracy: 0.8227 Epoch 44/200 6/6 [==============================] - 0s 9ms/step - loss: 0.4093 - accuracy: 0.8216 - val_loss: 0.4044 - val_accuracy: 0.8233 Epoch 45/200 6/6 [==============================] - 0s 11ms/step - loss: 0.4046 - accuracy: 0.8268 - val_loss: 0.4030 - val_accuracy: 0.8247 Epoch 46/200 6/6 [==============================] - 0s 10ms/step - loss: 0.4078 - accuracy: 0.8237 - val_loss: 0.4017 - val_accuracy: 0.8253 Epoch 47/200 6/6 [==============================] - 0s 10ms/step - loss: 0.4144 - accuracy: 0.8180 - val_loss: 0.4003 - val_accuracy: 0.8260 Epoch 48/200 6/6 [==============================] - 0s 11ms/step - loss: 0.4052 - accuracy: 0.8219 - val_loss: 0.3989 - val_accuracy: 0.8280 Epoch 49/200 6/6 [==============================] - 0s 9ms/step - loss: 0.4022 - accuracy: 0.8257 - val_loss: 0.3975 - val_accuracy: 0.8260 Epoch 50/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3991 - accuracy: 0.8268 - val_loss: 0.3960 - val_accuracy: 0.8280 Epoch 51/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3996 - accuracy: 0.8263 - val_loss: 0.3946 - val_accuracy: 0.8287 Epoch 52/200 6/6 [==============================] - 0s 10ms/step - loss: 0.4001 - accuracy: 0.8250 - val_loss: 0.3933 - val_accuracy: 0.8293 Epoch 53/200 6/6 [==============================] - 0s 9ms/step - loss: 0.3870 - accuracy: 0.8296 - val_loss: 0.3919 - val_accuracy: 0.8307 Epoch 54/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3977 - accuracy: 0.8271 - val_loss: 0.3905 - val_accuracy: 0.8307 Epoch 55/200 6/6 [==============================] - 0s 9ms/step - loss: 0.3991 - accuracy: 0.8270 - val_loss: 0.3893 - val_accuracy: 0.8307 Epoch 56/200 6/6 [==============================] - 0s 9ms/step - loss: 0.3944 - accuracy: 0.8276 - val_loss: 0.3879 - val_accuracy: 0.8313 Epoch 57/200 6/6 [==============================] - 0s 9ms/step - loss: 0.3905 - accuracy: 0.8327 - val_loss: 0.3864 - val_accuracy: 0.8320 Epoch 58/200 6/6 [==============================] - 0s 9ms/step - loss: 0.3899 - accuracy: 0.8293 - val_loss: 0.3850 - val_accuracy: 0.8347 Epoch 59/200 6/6 [==============================] - 0s 9ms/step - loss: 0.3839 - accuracy: 0.8328 - val_loss: 0.3836 - val_accuracy: 0.8353 Epoch 60/200 6/6 [==============================] - 0s 9ms/step - loss: 0.3940 - accuracy: 0.8276 - val_loss: 0.3821 - val_accuracy: 0.8380 Epoch 61/200 6/6 [==============================] - 0s 9ms/step - loss: 0.3838 - accuracy: 0.8351 - val_loss: 0.3808 - val_accuracy: 0.8387 Epoch 62/200 6/6 [==============================] - 0s 9ms/step - loss: 0.3913 - accuracy: 0.8309 - val_loss: 0.3794 - val_accuracy: 0.8420 Epoch 63/200 6/6 [==============================] - 0s 9ms/step - loss: 0.3792 - accuracy: 0.8382 - val_loss: 0.3782 - val_accuracy: 0.8420 Epoch 64/200 6/6 [==============================] - 0s 9ms/step - loss: 0.3836 - accuracy: 0.8346 - val_loss: 0.3768 - val_accuracy: 0.8440 Epoch 65/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3838 - accuracy: 0.8331 - val_loss: 0.3754 - val_accuracy: 0.8427 Epoch 66/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3809 - accuracy: 0.8360 - val_loss: 0.3743 - val_accuracy: 0.8427 Epoch 67/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3815 - accuracy: 0.8371 - val_loss: 0.3732 - val_accuracy: 0.8440 Epoch 68/200 6/6 [==============================] - 0s 12ms/step - loss: 0.3827 - accuracy: 0.8353 - val_loss: 0.3721 - val_accuracy: 0.8447 Epoch 69/200 6/6 [==============================] - 0s 12ms/step - loss: 0.3848 - accuracy: 0.8358 - val_loss: 0.3711 - val_accuracy: 0.8467 Epoch 70/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3773 - accuracy: 0.8390 - val_loss: 0.3700 - val_accuracy: 0.8473 Epoch 71/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3860 - accuracy: 0.8366 - val_loss: 0.3688 - val_accuracy: 0.8480 Epoch 72/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3800 - accuracy: 0.8364 - val_loss: 0.3676 - val_accuracy: 0.8480 Epoch 73/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3770 - accuracy: 0.8407 - val_loss: 0.3666 - val_accuracy: 0.8487 Epoch 74/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3766 - accuracy: 0.8438 - val_loss: 0.3655 - val_accuracy: 0.8493 Epoch 75/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3822 - accuracy: 0.8357 - val_loss: 0.3645 - val_accuracy: 0.8500 Epoch 76/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3771 - accuracy: 0.8400 - val_loss: 0.3636 - val_accuracy: 0.8500 Epoch 77/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3679 - accuracy: 0.8455 - val_loss: 0.3626 - val_accuracy: 0.8493 Epoch 78/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3709 - accuracy: 0.8446 - val_loss: 0.3617 - val_accuracy: 0.8500 Epoch 79/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3691 - accuracy: 0.8445 - val_loss: 0.3610 - val_accuracy: 0.8507 Epoch 80/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3705 - accuracy: 0.8454 - val_loss: 0.3602 - val_accuracy: 0.8507 Epoch 81/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3660 - accuracy: 0.8449 - val_loss: 0.3593 - val_accuracy: 0.8520 Epoch 82/200 6/6 [==============================] - 0s 19ms/step - loss: 0.3698 - accuracy: 0.8426 - val_loss: 0.3585 - val_accuracy: 0.8513 Epoch 83/200 6/6 [==============================] - 0s 14ms/step - loss: 0.3655 - accuracy: 0.8460 - val_loss: 0.3575 - val_accuracy: 0.8513 Epoch 84/200 6/6 [==============================] - 0s 16ms/step - loss: 0.3687 - accuracy: 0.8449 - val_loss: 0.3568 - val_accuracy: 0.8513 Epoch 85/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3706 - accuracy: 0.8445 - val_loss: 0.3561 - val_accuracy: 0.8520 Epoch 86/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3700 - accuracy: 0.8419 - val_loss: 0.3555 - val_accuracy: 0.8520 Epoch 87/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3610 - accuracy: 0.8477 - val_loss: 0.3547 - val_accuracy: 0.8533 Epoch 88/200 6/6 [==============================] - 0s 12ms/step - loss: 0.3682 - accuracy: 0.8449 - val_loss: 0.3541 - val_accuracy: 0.8540 Epoch 89/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3687 - accuracy: 0.8417 - val_loss: 0.3537 - val_accuracy: 0.8520 Epoch 90/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3713 - accuracy: 0.8422 - val_loss: 0.3531 - val_accuracy: 0.8527 Epoch 91/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3686 - accuracy: 0.8455 - val_loss: 0.3522 - val_accuracy: 0.8520 Epoch 92/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3652 - accuracy: 0.8525 - val_loss: 0.3515 - val_accuracy: 0.8553 Epoch 93/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3577 - accuracy: 0.8499 - val_loss: 0.3508 - val_accuracy: 0.8567 Epoch 94/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3672 - accuracy: 0.8464 - val_loss: 0.3502 - val_accuracy: 0.8573 Epoch 95/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3629 - accuracy: 0.8494 - val_loss: 0.3497 - val_accuracy: 0.8587 Epoch 96/200 6/6 [==============================] - 0s 19ms/step - loss: 0.3622 - accuracy: 0.8499 - val_loss: 0.3490 - val_accuracy: 0.8580 Epoch 97/200 6/6 [==============================] - 0s 13ms/step - loss: 0.3528 - accuracy: 0.8571 - val_loss: 0.3486 - val_accuracy: 0.8573 Epoch 98/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3539 - accuracy: 0.8518 - val_loss: 0.3481 - val_accuracy: 0.8567 Epoch 99/200 6/6 [==============================] - 0s 12ms/step - loss: 0.3691 - accuracy: 0.8434 - val_loss: 0.3477 - val_accuracy: 0.8573 Epoch 100/200 6/6 [==============================] - 0s 13ms/step - loss: 0.3650 - accuracy: 0.8499 - val_loss: 0.3472 - val_accuracy: 0.8587 Epoch 101/200 6/6 [==============================] - 0s 13ms/step - loss: 0.3466 - accuracy: 0.8605 - val_loss: 0.3466 - val_accuracy: 0.8587 Epoch 102/200 6/6 [==============================] - 0s 13ms/step - loss: 0.3527 - accuracy: 0.8551 - val_loss: 0.3463 - val_accuracy: 0.8587 Epoch 103/200 6/6 [==============================] - 0s 12ms/step - loss: 0.3566 - accuracy: 0.8503 - val_loss: 0.3458 - val_accuracy: 0.8600 Epoch 104/200 6/6 [==============================] - 0s 9ms/step - loss: 0.3529 - accuracy: 0.8540 - val_loss: 0.3453 - val_accuracy: 0.8600 Epoch 105/200 6/6 [==============================] - 0s 9ms/step - loss: 0.3554 - accuracy: 0.8517 - val_loss: 0.3449 - val_accuracy: 0.8600 Epoch 106/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3513 - accuracy: 0.8548 - val_loss: 0.3446 - val_accuracy: 0.8593 Epoch 107/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3555 - accuracy: 0.8530 - val_loss: 0.3437 - val_accuracy: 0.8607 Epoch 108/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3476 - accuracy: 0.8571 - val_loss: 0.3434 - val_accuracy: 0.8620 Epoch 109/200 6/6 [==============================] - 0s 12ms/step - loss: 0.3472 - accuracy: 0.8610 - val_loss: 0.3433 - val_accuracy: 0.8587 Epoch 110/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3559 - accuracy: 0.8520 - val_loss: 0.3430 - val_accuracy: 0.8593 Epoch 111/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3541 - accuracy: 0.8542 - val_loss: 0.3425 - val_accuracy: 0.8613 Epoch 112/200 6/6 [==============================] - 0s 9ms/step - loss: 0.3524 - accuracy: 0.8575 - val_loss: 0.3421 - val_accuracy: 0.8620 Epoch 113/200 6/6 [==============================] - 0s 9ms/step - loss: 0.3420 - accuracy: 0.8622 - val_loss: 0.3418 - val_accuracy: 0.8627 Epoch 114/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3476 - accuracy: 0.8558 - val_loss: 0.3416 - val_accuracy: 0.8613 Epoch 115/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3512 - accuracy: 0.8561 - val_loss: 0.3411 - val_accuracy: 0.8620 Epoch 116/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3550 - accuracy: 0.8576 - val_loss: 0.3407 - val_accuracy: 0.8620 Epoch 117/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3486 - accuracy: 0.8567 - val_loss: 0.3405 - val_accuracy: 0.8620 Epoch 118/200 6/6 [==============================] - 0s 9ms/step - loss: 0.3515 - accuracy: 0.8555 - val_loss: 0.3402 - val_accuracy: 0.8627 Epoch 119/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3506 - accuracy: 0.8547 - val_loss: 0.3399 - val_accuracy: 0.8633 Epoch 120/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3506 - accuracy: 0.8543 - val_loss: 0.3396 - val_accuracy: 0.8627 Epoch 121/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3508 - accuracy: 0.8548 - val_loss: 0.3393 - val_accuracy: 0.8627 Epoch 122/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3501 - accuracy: 0.8538 - val_loss: 0.3390 - val_accuracy: 0.8627 Epoch 123/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3551 - accuracy: 0.8491 - val_loss: 0.3388 - val_accuracy: 0.8620 Epoch 124/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3448 - accuracy: 0.8573 - val_loss: 0.3384 - val_accuracy: 0.8620 Epoch 125/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3516 - accuracy: 0.8545 - val_loss: 0.3381 - val_accuracy: 0.8627 Epoch 126/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3545 - accuracy: 0.8524 - val_loss: 0.3380 - val_accuracy: 0.8647 Epoch 127/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3484 - accuracy: 0.8552 - val_loss: 0.3377 - val_accuracy: 0.8653 Epoch 128/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3480 - accuracy: 0.8555 - val_loss: 0.3375 - val_accuracy: 0.8660 Epoch 129/200 6/6 [==============================] - 0s 9ms/step - loss: 0.3406 - accuracy: 0.8641 - val_loss: 0.3372 - val_accuracy: 0.8647 Epoch 130/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3413 - accuracy: 0.8553 - val_loss: 0.3371 - val_accuracy: 0.8647 Epoch 131/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3478 - accuracy: 0.8542 - val_loss: 0.3371 - val_accuracy: 0.8653 Epoch 132/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3411 - accuracy: 0.8612 - val_loss: 0.3367 - val_accuracy: 0.8660 Epoch 133/200 6/6 [==============================] - 0s 12ms/step - loss: 0.3488 - accuracy: 0.8552 - val_loss: 0.3364 - val_accuracy: 0.8660 Epoch 134/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3446 - accuracy: 0.8561 - val_loss: 0.3363 - val_accuracy: 0.8653 Epoch 135/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3520 - accuracy: 0.8525 - val_loss: 0.3362 - val_accuracy: 0.8653 Epoch 136/200 6/6 [==============================] - 0s 9ms/step - loss: 0.3445 - accuracy: 0.8563 - val_loss: 0.3358 - val_accuracy: 0.8633 Epoch 137/200 6/6 [==============================] - 0s 9ms/step - loss: 0.3415 - accuracy: 0.8605 - val_loss: 0.3356 - val_accuracy: 0.8653 Epoch 138/200 6/6 [==============================] - 0s 9ms/step - loss: 0.3421 - accuracy: 0.8620 - val_loss: 0.3354 - val_accuracy: 0.8660 Epoch 139/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3382 - accuracy: 0.8620 - val_loss: 0.3351 - val_accuracy: 0.8660 Epoch 140/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3367 - accuracy: 0.8605 - val_loss: 0.3351 - val_accuracy: 0.8660 Epoch 141/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3475 - accuracy: 0.8556 - val_loss: 0.3350 - val_accuracy: 0.8653 Epoch 142/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3441 - accuracy: 0.8581 - val_loss: 0.3350 - val_accuracy: 0.8660 Epoch 143/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3504 - accuracy: 0.8517 - val_loss: 0.3347 - val_accuracy: 0.8673 Epoch 144/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3423 - accuracy: 0.8601 - val_loss: 0.3344 - val_accuracy: 0.8667 Epoch 145/200 6/6 [==============================] - 0s 12ms/step - loss: 0.3411 - accuracy: 0.8582 - val_loss: 0.3343 - val_accuracy: 0.8660 Epoch 146/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3382 - accuracy: 0.8582 - val_loss: 0.3341 - val_accuracy: 0.8653 Epoch 147/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3465 - accuracy: 0.8596 - val_loss: 0.3341 - val_accuracy: 0.8667 Epoch 148/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3378 - accuracy: 0.8617 - val_loss: 0.3338 - val_accuracy: 0.8680 Epoch 149/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3403 - accuracy: 0.8612 - val_loss: 0.3337 - val_accuracy: 0.8680 Epoch 150/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3414 - accuracy: 0.8575 - val_loss: 0.3338 - val_accuracy: 0.8687 Epoch 151/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3406 - accuracy: 0.8616 - val_loss: 0.3338 - val_accuracy: 0.8687 Epoch 152/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3421 - accuracy: 0.8611 - val_loss: 0.3337 - val_accuracy: 0.8660 Epoch 153/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3424 - accuracy: 0.8616 - val_loss: 0.3333 - val_accuracy: 0.8673 Epoch 154/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3441 - accuracy: 0.8598 - val_loss: 0.3331 - val_accuracy: 0.8680 Epoch 155/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3410 - accuracy: 0.8596 - val_loss: 0.3331 - val_accuracy: 0.8680 Epoch 156/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3498 - accuracy: 0.8555 - val_loss: 0.3329 - val_accuracy: 0.8687 Epoch 157/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3376 - accuracy: 0.8615 - val_loss: 0.3326 - val_accuracy: 0.8673 Epoch 158/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3455 - accuracy: 0.8580 - val_loss: 0.3326 - val_accuracy: 0.8680 Epoch 159/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3445 - accuracy: 0.8566 - val_loss: 0.3328 - val_accuracy: 0.8687 Epoch 160/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3358 - accuracy: 0.8642 - val_loss: 0.3328 - val_accuracy: 0.8687 Epoch 161/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3407 - accuracy: 0.8573 - val_loss: 0.3326 - val_accuracy: 0.8673 Epoch 162/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3468 - accuracy: 0.8587 - val_loss: 0.3325 - val_accuracy: 0.8687 Epoch 163/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3464 - accuracy: 0.8583 - val_loss: 0.3322 - val_accuracy: 0.8673 Epoch 164/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3401 - accuracy: 0.8597 - val_loss: 0.3324 - val_accuracy: 0.8687 Epoch 165/200 6/6 [==============================] - 0s 18ms/step - loss: 0.3315 - accuracy: 0.8676 - val_loss: 0.3322 - val_accuracy: 0.8680 Epoch 166/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3360 - accuracy: 0.8631 - val_loss: 0.3322 - val_accuracy: 0.8680 Epoch 167/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3386 - accuracy: 0.8604 - val_loss: 0.3321 - val_accuracy: 0.8680 Epoch 168/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3319 - accuracy: 0.8644 - val_loss: 0.3319 - val_accuracy: 0.8680 Epoch 169/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3387 - accuracy: 0.8618 - val_loss: 0.3317 - val_accuracy: 0.8673 Epoch 170/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3390 - accuracy: 0.8581 - val_loss: 0.3317 - val_accuracy: 0.8667 Epoch 171/200 6/6 [==============================] - 0s 12ms/step - loss: 0.3367 - accuracy: 0.8613 - val_loss: 0.3320 - val_accuracy: 0.8667 Epoch 172/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3431 - accuracy: 0.8574 - val_loss: 0.3321 - val_accuracy: 0.8673 Epoch 173/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3287 - accuracy: 0.8665 - val_loss: 0.3317 - val_accuracy: 0.8673 Epoch 174/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3392 - accuracy: 0.8613 - val_loss: 0.3316 - val_accuracy: 0.8660 Epoch 175/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3409 - accuracy: 0.8581 - val_loss: 0.3317 - val_accuracy: 0.8660 Epoch 176/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3385 - accuracy: 0.8575 - val_loss: 0.3315 - val_accuracy: 0.8660 Epoch 177/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3422 - accuracy: 0.8591 - val_loss: 0.3315 - val_accuracy: 0.8667 Epoch 178/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3297 - accuracy: 0.8670 - val_loss: 0.3315 - val_accuracy: 0.8660 Epoch 179/200 6/6 [==============================] - 0s 13ms/step - loss: 0.3348 - accuracy: 0.8603 - val_loss: 0.3315 - val_accuracy: 0.8653 Epoch 180/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3378 - accuracy: 0.8593 - val_loss: 0.3315 - val_accuracy: 0.8667 Epoch 181/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3353 - accuracy: 0.8644 - val_loss: 0.3312 - val_accuracy: 0.8680 Epoch 182/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3324 - accuracy: 0.8645 - val_loss: 0.3311 - val_accuracy: 0.8667 Epoch 183/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3401 - accuracy: 0.8611 - val_loss: 0.3314 - val_accuracy: 0.8667 Epoch 184/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3348 - accuracy: 0.8622 - val_loss: 0.3315 - val_accuracy: 0.8673 Epoch 185/200 6/6 [==============================] - 0s 12ms/step - loss: 0.3439 - accuracy: 0.8557 - val_loss: 0.3313 - val_accuracy: 0.8693 Epoch 186/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3389 - accuracy: 0.8599 - val_loss: 0.3312 - val_accuracy: 0.8660 Epoch 187/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3445 - accuracy: 0.8601 - val_loss: 0.3310 - val_accuracy: 0.8653 Epoch 188/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3391 - accuracy: 0.8653 - val_loss: 0.3309 - val_accuracy: 0.8667 Epoch 189/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3461 - accuracy: 0.8565 - val_loss: 0.3310 - val_accuracy: 0.8667 Epoch 190/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3360 - accuracy: 0.8584 - val_loss: 0.3311 - val_accuracy: 0.8667 Epoch 191/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3362 - accuracy: 0.8604 - val_loss: 0.3310 - val_accuracy: 0.8673 Epoch 192/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3316 - accuracy: 0.8647 - val_loss: 0.3307 - val_accuracy: 0.8660 Epoch 193/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3400 - accuracy: 0.8582 - val_loss: 0.3308 - val_accuracy: 0.8687 Epoch 194/200 6/6 [==============================] - 0s 9ms/step - loss: 0.3406 - accuracy: 0.8598 - val_loss: 0.3307 - val_accuracy: 0.8687 Epoch 195/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3342 - accuracy: 0.8630 - val_loss: 0.3308 - val_accuracy: 0.8687 Epoch 196/200 6/6 [==============================] - 0s 9ms/step - loss: 0.3404 - accuracy: 0.8588 - val_loss: 0.3308 - val_accuracy: 0.8680 Epoch 197/200 6/6 [==============================] - 0s 11ms/step - loss: 0.3335 - accuracy: 0.8660 - val_loss: 0.3310 - val_accuracy: 0.8680 Epoch 198/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3364 - accuracy: 0.8653 - val_loss: 0.3308 - val_accuracy: 0.8680 Epoch 199/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3439 - accuracy: 0.8590 - val_loss: 0.3308 - val_accuracy: 0.8680 Epoch 200/200 6/6 [==============================] - 0s 10ms/step - loss: 0.3365 - accuracy: 0.8601 - val_loss: 0.3308 - val_accuracy: 0.8687
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
plt.plot(hist['loss'])
plt.plot(hist['val_loss'])
plt.legend(("train" , "valid") , loc =0)
<matplotlib.legend.Legend at 0x182dfae2280>
plt.plot(hist['accuracy'])
plt.plot(hist['val_accuracy'])
plt.legend(("train" , "valid") , loc =0)
<matplotlib.legend.Legend at 0x182de941e50>
**Observation:**
# comparing with Logistic Regression
logreg = LogisticRegression(n_jobs=-1)
logreg.fit(X_train,y_train)
print("Training Score: ",logreg.score(X_train, y_train))
print("Validation Score: ",logreg.score(X_val, y_val))
print("Test Score: ",logreg.score(X_test, y_test))
Training Score: 0.81 Validation Score: 0.8146666666666667 Test Score: 0.8144
# comparing with Random forest
rfc = RandomForestClassifier(max_depth=10, n_jobs=None, random_state=5)
rfc.fit(X_train,y_train)
print("Training Score: ",rfc.score(X_train, y_train))
print("Validation Score: ",rfc.score(X_val, y_val))
print("Test Score: ",rfc.score(X_test, y_test))
Training Score: 0.9083333333333333 Validation Score: 0.8766666666666667 Test Score: 0.8616
# comparing with gradient boosting
gbc = GradientBoostingClassifier(n_estimators = 250, learning_rate=0.01, random_state=5)
gbc = gbc.fit(X_train, y_train)
print("Training Score: ",gbc.score(X_train, y_train))
print("Validation Score: ",gbc.score(X_val, y_val))
print("Test Score: ",gbc.score(X_test, y_test))
Training Score: 0.8601666666666666 Validation Score: 0.8773333333333333 Test Score: 0.8572
**Observation:**
# Build hypermodel
def build_model(hp):
model_1 = Sequential() #Start model
model_1.add(Dense(units=hp.Int('units_in',min_value=10,max_value=100,step=10),
input_shape = (13,),
activation = hp.Choice('act_in', ['relu', 'tanh','elu']))) # Hidden Layer connected to Input Layer
for i in range(hp.Int('num_layers', 0, 2)):
model_1.add(Dense(units=hp.Int('units_' + str(i),
min_value=10,
max_value=100,
step=10),
activation=hp.Choice('act_' + str(i), ['relu', 'tanh','elu']))) # Hidden Layer
model_1.add(Dropout(hp.Float('dropout', 0, 0.5, step=0.1, default=0.3))) # Drop out layer for regularization
model_1.add(Dense(1, activation='sigmoid')) # Output layer
model_1.compile(
optimizer=optimizers.Adam(
hp.Choice('learning_rate', [0.01, 0.005, 0.001, 0.0005, 0.0001])),
loss='binary_crossentropy',
metrics=['accuracy'])
return model_1
# removing the path if existis
dirpath = Path('C:/AI-ML/UTA/BankChurn', 'Random_Search')
if dirpath.exists() and dirpath.is_dir():
shutil.rmtree(dirpath)
# define tuner for random search
tunerRS = RandomSearch(
build_model,
objective='val_accuracy',
max_trials=5,
executions_per_trial=2,
seed=5,
directory='C:/AI-ML/UTA/BankChurn',
project_name='Random_Search')
# serch parameter space
tunerRS.search_space_summary()
Search space summary
Default search space size: 4
units_in (Int)
{'default': None, 'conditions': [], 'min_value': 10, 'max_value': 100, 'step': 10, 'sampling': None}
act_in (Choice)
{'default': 'relu', 'conditions': [], 'values': ['relu', 'tanh', 'elu'], 'ordered': False}
num_layers (Int)
{'default': None, 'conditions': [], 'min_value': 0, 'max_value': 2, 'step': 1, 'sampling': None}
learning_rate (Choice)
{'default': 0.01, 'conditions': [], 'values': [0.01, 0.005, 0.001, 0.0005, 0.0001], 'ordered': True}
# Serching best parameter run
tunerRS.search(X_train, y_train , batch_size=1000, epochs = 200,validation_data =(X_val,y_val))
Trial 5 Complete [00h 00m 26s] val_accuracy: 0.8146666586399078 Best val_accuracy So Far: 0.8819999992847443 Total elapsed time: 00h 02m 17s INFO:tensorflow:Oracle triggered exit
#Summary of best results
print(tunerRS.results_summary())
Results summary Results in C:/AI-ML/UTA/BankChurn\Random_Search Showing 10 best trials Objective(name='val_accuracy', direction='max') Trial summary Hyperparameters: units_in: 90 act_in: elu num_layers: 1 learning_rate: 0.005 units_0: 70 act_0: relu dropout: 0.30000000000000004 Score: 0.8819999992847443 Trial summary Hyperparameters: units_in: 40 act_in: elu num_layers: 0 learning_rate: 0.01 units_0: 90 act_0: relu dropout: 0.5 Score: 0.8803333342075348 Trial summary Hyperparameters: units_in: 70 act_in: elu num_layers: 1 learning_rate: 0.005 units_0: 10 act_0: relu dropout: 0.3 Score: 0.879666656255722 Trial summary Hyperparameters: units_in: 50 act_in: elu num_layers: 1 learning_rate: 0.0005 units_0: 30 act_0: relu dropout: 0.5 Score: 0.8720000088214874 Trial summary Hyperparameters: units_in: 60 act_in: relu num_layers: 0 learning_rate: 0.0001 units_0: 60 act_0: elu dropout: 0.2 Score: 0.8146666586399078 None
# Best Hyperparameter model using Randomized Search CV - This can change on individual machines
print(tunerRS.get_best_hyperparameters()[0].values)
{'units_in': 90, 'act_in': 'elu', 'num_layers': 1, 'learning_rate': 0.005, 'units_0': 70, 'act_0': 'relu', 'dropout': 0.30000000000000004}
# get best model config through random search
print(tunerRS.get_best_models()[0].summary())
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= dense (Dense) (None, 90) 1260 _________________________________________________________________ dense_1 (Dense) (None, 70) 6370 _________________________________________________________________ dropout (Dropout) (None, 70) 0 _________________________________________________________________ dense_2 (Dense) (None, 1) 71 ================================================================= Total params: 7,701 Trainable params: 7,701 Non-trainable params: 0 _________________________________________________________________ WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer.iter WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer.beta_1 WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer.beta_2 WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer.decay WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer.learning_rate WARNING:tensorflow:A checkpoint was restored (e.g. tf.train.Checkpoint.restore or tf.keras.Model.load_weights) but not all checkpointed values were used. See above for specific issues. Use expect_partial() on the load status object, e.g. tf.train.Checkpoint.restore(...).expect_partial(), to silence these warnings, or use assert_consumed() to make the check explicit. See https://www.tensorflow.org/guide/checkpoint#loading_mechanics for details. None
# removing the path if existis
dirpath = Path('C:/AI-ML/UTA/BankChurn', 'hyperband_search')
if dirpath.exists() and dirpath.is_dir():
shutil.rmtree(dirpath)
# define tuner for hyperband search
tunerHB = Hyperband(build_model,
objective='val_accuracy',
max_epochs=100,
factor=10,
seed=5,
directory='C:/AI-ML/UTA/BankChurn',
project_name='hyperband_search')
# serch parameter space
tunerHB.search_space_summary()
Search space summary
Default search space size: 4
units_in (Int)
{'default': None, 'conditions': [], 'min_value': 10, 'max_value': 100, 'step': 10, 'sampling': None}
act_in (Choice)
{'default': 'relu', 'conditions': [], 'values': ['relu', 'tanh', 'elu'], 'ordered': False}
num_layers (Int)
{'default': None, 'conditions': [], 'min_value': 0, 'max_value': 2, 'step': 1, 'sampling': None}
learning_rate (Choice)
{'default': 0.01, 'conditions': [], 'values': [0.01, 0.005, 0.001, 0.0005, 0.0001], 'ordered': True}
# Serching best parameter run
tunerHB.search(X_train, y_train , batch_size=1000, epochs = 200,validation_data =(X_val,y_val))
Trial 131 Complete [00h 00m 08s] val_accuracy: 0.8726666569709778 Best val_accuracy So Far: 0.8786666393280029 Total elapsed time: 00h 03m 01s INFO:tensorflow:Oracle triggered exit
#Summary of best results
print(tunerHB.results_summary())
Results summary Results in C:/AI-ML/UTA/BankChurn\hyperband_search Showing 10 best trials Objective(name='val_accuracy', direction='max') Trial summary Hyperparameters: units_in: 100 act_in: tanh num_layers: 1 learning_rate: 0.005 units_0: 60 act_0: elu dropout: 0.4 units_1: 40 act_1: relu tuner/epochs: 100 tuner/initial_epoch: 10 tuner/bracket: 1 tuner/round: 1 tuner/trial_id: 0a1ae695bb5b244563785afb0e45bf90 Score: 0.8786666393280029 Trial summary Hyperparameters: units_in: 40 act_in: tanh num_layers: 1 learning_rate: 0.01 units_0: 20 act_0: tanh dropout: 0.2 units_1: 80 act_1: elu tuner/epochs: 100 tuner/initial_epoch: 10 tuner/bracket: 1 tuner/round: 1 tuner/trial_id: b4626ce79ce4f3c8ea03eabe986f6a9f Score: 0.8773333430290222 Trial summary Hyperparameters: units_in: 100 act_in: elu num_layers: 2 learning_rate: 0.01 units_0: 40 act_0: elu dropout: 0.0 units_1: 10 act_1: tanh tuner/epochs: 100 tuner/initial_epoch: 10 tuner/bracket: 2 tuner/round: 2 tuner/trial_id: c4c1f6ce91ed482ed7eacbe31a117c6e Score: 0.8759999871253967 Trial summary Hyperparameters: units_in: 100 act_in: elu num_layers: 2 learning_rate: 0.01 units_0: 40 act_0: elu dropout: 0.0 units_1: 10 act_1: tanh tuner/epochs: 10 tuner/initial_epoch: 1 tuner/bracket: 2 tuner/round: 1 tuner/trial_id: 7b8c0310b9512e568e9d0cb836751fcf Score: 0.874666690826416 Trial summary Hyperparameters: units_in: 50 act_in: relu num_layers: 1 learning_rate: 0.01 units_0: 60 act_0: relu dropout: 0.0 units_1: 30 act_1: relu tuner/epochs: 10 tuner/initial_epoch: 1 tuner/bracket: 2 tuner/round: 1 tuner/trial_id: 437c101b9d49ca6eb9d2d7852cf528c7 Score: 0.8740000128746033 Trial summary Hyperparameters: units_in: 50 act_in: relu num_layers: 0 learning_rate: 0.01 units_0: 10 act_0: tanh dropout: 0.30000000000000004 units_1: 100 act_1: elu tuner/epochs: 100 tuner/initial_epoch: 0 tuner/bracket: 0 tuner/round: 0 Score: 0.8733333349227905 Trial summary Hyperparameters: units_in: 50 act_in: relu num_layers: 2 learning_rate: 0.005 units_0: 90 act_0: tanh dropout: 0.0 units_1: 30 act_1: elu tuner/epochs: 100 tuner/initial_epoch: 0 tuner/bracket: 0 tuner/round: 0 Score: 0.8726666569709778 Trial summary Hyperparameters: units_in: 60 act_in: elu num_layers: 1 learning_rate: 0.01 units_0: 40 act_0: relu dropout: 0.0 units_1: 80 act_1: relu tuner/epochs: 10 tuner/initial_epoch: 1 tuner/bracket: 2 tuner/round: 1 tuner/trial_id: 58b1aa72cd194be332cfa158944f5c7f Score: 0.871999979019165 Trial summary Hyperparameters: units_in: 100 act_in: relu num_layers: 2 learning_rate: 0.005 units_0: 60 act_0: tanh dropout: 0.1 units_1: 30 act_1: relu tuner/epochs: 10 tuner/initial_epoch: 1 tuner/bracket: 2 tuner/round: 1 tuner/trial_id: 38924c7aa4b4331f3790e5af3c837fb0 Score: 0.8713333606719971 Trial summary Hyperparameters: units_in: 40 act_in: elu num_layers: 2 learning_rate: 0.0005 units_0: 50 act_0: relu dropout: 0.1 units_1: 70 act_1: tanh tuner/epochs: 100 tuner/initial_epoch: 0 tuner/bracket: 0 tuner/round: 0 Score: 0.8693333268165588 None
# Best Hyperparameter model using hyperband
print(tunerHB.get_best_hyperparameters()[0].values)
{'units_in': 100, 'act_in': 'tanh', 'num_layers': 1, 'learning_rate': 0.005, 'units_0': 60, 'act_0': 'elu', 'dropout': 0.4, 'units_1': 40, 'act_1': 'relu', 'tuner/epochs': 100, 'tuner/initial_epoch': 10, 'tuner/bracket': 1, 'tuner/round': 1, 'tuner/trial_id': '0a1ae695bb5b244563785afb0e45bf90'}
# get best model config through hyperband search
print(tunerHB.get_best_models()[0].summary())
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= dense (Dense) (None, 100) 1400 _________________________________________________________________ dense_1 (Dense) (None, 60) 6060 _________________________________________________________________ dropout (Dropout) (None, 60) 0 _________________________________________________________________ dense_2 (Dense) (None, 1) 61 ================================================================= Total params: 7,521 Trainable params: 7,521 Non-trainable params: 0 _________________________________________________________________ WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer.iter WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer.beta_1 WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer.beta_2 WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer.decay WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer.learning_rate WARNING:tensorflow:A checkpoint was restored (e.g. tf.train.Checkpoint.restore or tf.keras.Model.load_weights) but not all checkpointed values were used. See above for specific issues. Use expect_partial() on the load status object, e.g. tf.train.Checkpoint.restore(...).expect_partial(), to silence these warnings, or use assert_consumed() to make the check explicit. See https://www.tensorflow.org/guide/checkpoint#loading_mechanics for details. None
**Observation:**
# Run test data against best Hyperparametrized Model from Random search
best_model = tunerRS.get_best_models(num_models=1)[0]
results = best_model.evaluate(X_test, y_test)
79/79 [==============================] - 0s 692us/step - loss: 0.3610 - accuracy: 0.8371
# Run test data against best Hyperparametrized Model from Hyperband
best_model = tunerHB.get_best_models(num_models=1)[0]
results = best_model.evaluate(X_test, y_test)
79/79 [==============================] - 0s 705us/step - loss: 0.3528 - accuracy: 0.8495
# Run test data against original base model
results = model.evaluate(X_test, y_test)
79/79 [==============================] - 0s 705us/step - loss: 0.3548 - accuracy: 0.8556
**Observation:**
#predicting target variable
y_predict = model.predict_classes(X_test, batch_size=1000, verbose=0)
# printing different metrices
print('Accuracy Model: '+ str(model.evaluate(X_test,y_test)[1]))
print('Recall_score: ' + str(recall_score(y_test,y_predict)))
print('Precision_score: ' + str(precision_score(y_test, y_predict)))
print('F-score: ' + str(f1_score(y_test,y_predict)))
79/79 [==============================] - 0s 613us/step - loss: 0.3548 - accuracy: 0.8556 Accuracy Model: 0.8555999994277954 Recall_score: 0.46706586826347307 Precision_score: 0.7134146341463414 F-score: 0.5645355850422196
#AUC ROC curve
logit_roc_auc = roc_auc_score(y_test, y_predict)
fpr, tpr, thresholds = roc_curve(y_test, model.predict_proba(X_test),pos_label=1)
plt.figure(figsize=(10,8))
plt.plot(fpr, tpr, label='Logistic Regression (area = %0.2f)' % logit_roc_auc)
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([-0.01, 1.0])
plt.ylim([-0.01, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.show()
# confusion matrix
cm= confusion_matrix(y_test,y_predict)
plt.figure(figsize=(6,4))
sns.heatmap(cm, annot=True, fmt='.0f', annot_kws={"size":15})
plt.ylabel('Observed', size=15)
plt.xlabel('Predicted', size=15);
plt.xticks(size=25, color='blue');
plt.yticks(size=25, color='blue');